#include <linux/bootmem.h>
#include <linux/nodemask.h>
+#ifdef XEN
+#include <linux/init.h>
+#endif
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
#include <asm/sn/pcidev.h>
+#endif
#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
+#ifndef XEN
#include <asm/sn/tioca_provider.h>
#include <asm/sn/tioce_provider.h>
+#endif
+#ifdef XEN
+#include "asm/sn/hubdev.h"
+#include "asm/sn/xwidgetdev.h"
+#else
#include "xtalk/hubdev.h"
#include "xtalk/xwidgetdev.h"
+#endif
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
+#ifdef XEN
+extern void sn_irq_lh_init(void);
+#endif
static struct list_head sn_sysdata_list;
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
+#ifndef XEN
static int max_segment_number; /* Default highest segment number */
static int max_pcibus_number = 255; /* Default highest pci bus number */
.dma_unmap = sn_default_pci_unmap,
.bus_fixup = sn_default_pci_bus_fixup,
};
+#endif
/*
* Retrieve the DMA Flush List given nasid, widget, and device.
return ret_stuff.v0;
}
+#ifndef XEN
/*
* Retrieve the pci device information given the bus and device|function number.
*/
nasid, widget, device,
(u64)(dev_entry->common));
else
+#ifdef XEN
+ BUG();
+#else
status = sn_device_fixup_war(nasid,
widget, device,
dev_entry->common);
+#endif
if (status != SALRET_OK)
panic("SAL call failed: %s\n",
ia64_sal_strerror(status));
}
return;
}
+#endif
/*
* Ugly hack to get PCI setup until we have a proper ACPI namespace.
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
+#ifndef XEN
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
pcibr_init_provider();
tioca_init_provider();
tioce_init_provider();
+#endif
/*
* This is needed to avoid bounce limit checks in the blk layer
*/
ia64_max_iommu_merge_mask = ~PAGE_MASK;
+#ifndef XEN
sn_fixup_ionodes();
+#endif
sn_irq_lh_init();
INIT_LIST_HEAD(&sn_sysdata_list);
+#ifndef XEN
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
while ((pci_dev =
pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
sn_pci_fixup_slot(pci_dev);
+#endif
sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
{
struct hubdev_info *hubdev_info;
int size;
+#ifndef XEN
pg_data_t *pg;
+#else
+ struct pglist_data *pg;
+#endif
size = sizeof(struct hubdev_info);
return hubdev->hdi_geoid;
}
+#ifndef XEN
void sn_generate_path(struct pci_bus *pci_bus, char *address)
{
nasid_t nasid;
(bricktype == L1_BRICKTYPE_1932))
sprintf(address, "%s^%d", address, geo_slot(geoid));
}
+#endif
+#ifdef XEN
+__initcall(sn_pci_init);
+#else
subsys_initcall(sn_pci_init);
+#endif
+#ifndef XEN
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_pci_controller_fixup);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);
EXPORT_SYMBOL(sn_generate_path);
+#endif
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
+#ifndef XEN
#include <asm/vga.h>
+#endif
#include <asm/sn/nodepda.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
+#ifdef XEN
+#define vga_console_iobase 0
+#endif
+
/**
* sn_io_addr - convert an in/out port to an i/o address
* @port: port to convert
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/init.h>
+#ifdef XEN
+#include <linux/pci.h>
+#include <asm/hw_irq.h>
+#endif
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
#include <asm/sn/pcidev.h>
+#endif
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
+#ifdef XEN
+#define move_native_irq(foo) do {} while(0)
+#endif
+
static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
force_interrupt(irq);
}
+#ifndef XEN
static void sn_irq_info_free(struct rcu_head *head);
struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
(pci_provider->target_interrupt)(new_irq_info);
spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_replace(&sn_irq_info->list, &new_irq_info->list);
+#else
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+#endif
spin_unlock(&sn_irq_info_lock);
+#ifndef XEN
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
#ifdef CONFIG_SMP
set_irq_affinity_info((vector & 0xff), cpuphys, 0);
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
+#endif
struct hw_interrupt_type irq_type_sn = {
+#ifndef XEN
.name = "SN hub",
+#endif
.startup = sn_startup_irq,
.shutdown = sn_shutdown_irq,
.enable = sn_enable_irq,
.disable = sn_disable_irq,
.ack = sn_ack_irq,
.end = sn_end_irq,
+#ifndef XEN
.set_affinity = sn_set_affinity_irq
+#endif
};
unsigned int sn_local_vector_to_irq(u8 vector)
int i;
irq_desc_t *base_desc = irq_desc;
+#ifndef XEN
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
base_desc[i].chip = &irq_type_sn;
}
}
+#endif
}
static void register_intr_pda(struct sn_irq_info *sn_irq_info)
struct sn_irq_info *tmp_irq_info;
int i, foundmatch;
+#ifndef XEN
rcu_read_lock();
+#else
+ spin_lock(&sn_irq_info_lock);
+#endif
if (pdacpu(cpu)->sn_last_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_last_irq - 1;
i && !foundmatch; i--) {
+#ifdef XEN
+ list_for_each_entry(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#else
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
+#endif
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
foundmatch = 0;
for (i = pdacpu(cpu)->sn_first_irq + 1;
i < NR_IRQS && !foundmatch; i++) {
+#ifdef XEN
+ list_for_each_entry(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#else
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
+#endif
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
}
pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
}
+#ifndef XEN
rcu_read_unlock();
+#else
+ spin_unlock(&sn_irq_info_lock);
+#endif
}
+#ifndef XEN
static void sn_irq_info_free(struct rcu_head *head)
{
struct sn_irq_info *sn_irq_info;
sn_irq_info = container_of(head, struct sn_irq_info, rcu);
kfree(sn_irq_info);
}
+#endif
+#ifndef XEN
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
nasid_t nasid = sn_irq_info->irq_nasid;
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#else
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#endif
+#ifndef XEN
reserve_irq_vector(sn_irq_info->irq_irq);
+#endif
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
unregister_intr_pda(sn_irq_info);
spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_del(&sn_irq_info->list);
+#else
list_del_rcu(&sn_irq_info->list);
+#endif
spin_unlock(&sn_irq_info_lock);
if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
free_irq_vector(sn_irq_info->irq_irq);
+#ifndef XEN
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
pci_dev_put(pci_dev);
}
+#endif
static inline void
sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
{
struct sn_irq_info *sn_irq_info;
+#ifndef XEN
if (!sn_ioif_inited)
return;
+#endif
+#ifdef XEN
+ spin_lock(&sn_irq_info_lock);
+#else
rcu_read_lock();
+#endif
+#ifdef XEN
+ list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
+#else
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+#endif
sn_call_force_intr_provider(sn_irq_info);
+#ifdef XEN
+ spin_unlock(&sn_irq_info_lock);
+#else
rcu_read_unlock();
+#endif
}
+#ifndef XEN
/*
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
}
sn_irq_info->irq_last_intr = regval;
}
+#endif
void sn_lb_int_war_check(void)
{
struct sn_irq_info *sn_irq_info;
int i;
+#ifndef XEN
+#ifdef XEN
+ if (pda->sn_first_irq == 0)
+#else
if (!sn_ioif_inited || pda->sn_first_irq == 0)
+#endif
return;
+#ifdef XEN
+ spin_lock(&sn_irq_info_lock);
+#else
rcu_read_lock();
+#endif
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+#ifdef XEN
+ list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
+#else
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
+#endif
sn_check_intr(i, sn_irq_info);
}
}
+#ifdef XEN
+ spin_unlock(&sn_irq_info_lock);
+#else
rcu_read_unlock();
+#endif
+#endif
}
void __init sn_irq_lh_init(void)
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#ifndef XEN
#include <linux/kdev_t.h>
+#endif
#include <linux/string.h>
+#ifndef XEN
#include <linux/screen_info.h>
+#endif
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/acpi.h>
#include <linux/compiler.h>
#include <linux/sched.h>
+#ifndef XEN
#include <linux/root_dev.h>
+#endif
#include <linux/nodemask.h>
#include <linux/pm.h>
#include <linux/efi.h>
#include <asm/machvec.h>
#include <asm/system.h>
#include <asm/processor.h>
+#ifndef XEN
#include <asm/vga.h>
+#endif
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/simulator.h>
#include <asm/sn/leds.h>
+#ifndef XEN
#include <asm/sn/bte.h>
+#endif
#include <asm/sn/shub_mmr.h>
+#ifndef XEN
#include <asm/sn/clksupport.h>
+#endif
#include <asm/sn/sn_sal.h>
#include <asm/sn/geo.h>
#include <asm/sn/sn_feature_sets.h>
+#ifndef XEN
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
+#else
+#include "asm/sn/xwidgetdev.h"
+#include "asm/sn/hubdev.h"
+#endif
#include <asm/sn/klconfig.h>
+#ifdef XEN
+#include <asm/sn/shubio.h>
+
+/* Xen has no clue about NUMA .... grrrr */
+#define pxm_to_node(foo) 0
+#define node_to_pxm(foo) 0
+#define numa_node_id() 0
+#endif
DEFINE_PER_CPU(struct pda_s, pda_percpu);
static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
+#ifndef XEN
/*
* The format of "screen_info" is strange, and due to early i386-setup
* code. This is just enough to make the console code think we're on a
.orig_video_isVGA = 1,
.orig_video_points = 16
};
+#endif
/*
* This routine can only be used during init, since
}
}
+#ifndef XEN
/*
* Scan the EFI PCDP table (if it exists) for an acceptable VGA console
* output device. If one exists, pick it and set sn_legacy_{io,mem} to
return (rtc_now - sn2_rtc_initial) *
(1000000000 / sn_rtc_cycles_per_second);
}
+#endif
/**
* sn_setup - SN platform setup routine
* the RTC frequency (via a SAL call), initializing secondary CPUs, and
* setting up per-node data areas. The console is also initialized here.
*/
+#ifdef XEN
+void __cpuinit sn_cpu_init(void);
+#endif
+
void __init sn_setup(char **cmdline_p)
{
long status, ticks_per_sec, drift;
u32 version = sn_sal_rev();
+#ifndef XEN
extern void sn_cpu_init(void);
sn2_rtc_initial = rtc_time();
#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+#endif
/*
* Build the tables for managing cnodes.
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
} else
sn_rtc_cycles_per_second = ticks_per_sec;
+#ifndef XEN
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
ia64_printk_clock = ia64_sn2_printk_clock;
+#endif
printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
* we set the default root device to /dev/hda
* to make simulation easy
*/
+#ifndef XEN
ROOT_DEV = Root_HDA1;
+#endif
/*
* Create the PDAs and NODEPDAs for all the cpus.
*/
sn_init_pdas(cmdline_p);
+#ifndef XEN
ia64_mark_idle = &snidle;
+#endif
/*
* For the bootcpu, we do this here. All other cpus will make the
*/
sn_cpu_init();
+#ifndef XEN
#ifdef CONFIG_SMP
init_smp_config();
#endif
*/
pm_power_off = ia64_sn_power_down;
current->thread.flags |= IA64_THREAD_MIGRATION;
+#endif
}
/**
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
sizeof(nodepdaindr));
+#ifndef XEN
/*
* Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct
for (cnode = 0; cnode < num_cnodes; cnode++) {
hubdev_init_node(nodepdaindr[cnode], cnode);
}
+#endif
}
/**
static int wars_have_been_checked;
cpuid = smp_processor_id();
+#ifndef XEN
if (cpuid == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
printk(KERN_INFO "Running on medusa with %s PROM\n",
(sn_prom_type == 1) ? "real" : "fake");
}
+#endif
memset(pda, 0, sizeof(pda));
if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
}
+#ifndef XEN /* local_node_data is not allocated .... yet */
/*
* WAR addresses for SHUB 1.x.
*/
(volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
SH1_PI_CAM_CONTROL);
}
+#endif
}
/*
#include <asm/numa.h>
#include <asm/hw_irq.h>
#include <asm/current.h>
+#ifdef XEN
+#include <asm/sn/arch.h>
+#endif
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
#define max_active_pio(sh1) ((sh1) ? 32 : 7)
#define reset_max_active_on_deadlock() 1
+#ifndef XEN
#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
+#else
+#define PTC_LOCK(sh1) &sn2_global_ptc_lock
+#endif
struct ptc_stats {
unsigned long ptc_l;
return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
}
+#ifndef XEN /* No idea if Xen will ever support this */
/**
* sn_migrate - SN-specific task migration actions
* @task: Task being migrated to new CPU
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
/* flush_tlb_mm is inefficient if more than 1 users of mm */
+#ifndef XEN
if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+#else
+ if (mm == ¤t->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
+#endif
flush_tlb_mm(mm);
}
+#endif
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* done with ptc.g/MMRs under protection of the global ptc_lock.
*/
+#ifdef XEN /* Xen is soooooooo stupid! */
+static cpumask_t mask_all = CPU_MASK_ALL;
+#endif
+
void
+#ifndef XEN
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
+#else
+sn2_global_tlb_purge(unsigned long start,
+#endif
unsigned long end, unsigned long nbits)
{
int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+#ifndef XEN
int mymm = (mm == current->active_mm && mm == current->mm);
+#else
+ struct mm_struct *mm;
+ int mymm = 1;
+#endif
int use_cpu_ptcga;
volatile unsigned long *ptc0, *ptc1;
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
nodes_clear(nodes_flushed);
i = 0;
+#ifndef XEN /* One day Xen will grow up! */
for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
i++;
}
+#else
+ for_each_cpu(cpu) {
+ cnode = cpu_to_node(cpu);
+ node_set(cnode, nodes_flushed);
+ lcpu = cpu;
+ i++;
+ }
+#endif
if (i == 0)
return;
return;
}
+#ifndef XEN
if (atomic_read(&mm->mm_users) == 1 && mymm) {
+#ifndef XEN /* I hate Xen! */
flush_tlb_mm(mm);
+#else
+ flush_tlb_mask(mask_all);
+#endif
__get_cpu_var(ptcstats).change_rid++;
preempt_enable();
return;
}
+#endif
itc = ia64_get_itc();
nix = 0;
for_each_node_mask(cnode, nodes_flushed)
nasids[nix++] = cnodeid_to_nasid(cnode);
+#ifndef XEN
rr_value = (mm->context << 3) | REGION_NUMBER(start);
+#else
+ rr_value = REGION_NUMBER(start);
+#endif
shub1 = is_shub1();
if (shub1) {
(nbits << SH1_PTC_0_PS_SHFT) |
(rr_value << SH1_PTC_0_RID_SHFT) |
(1UL << SH1_PTC_0_START_SHFT);
+#ifndef XEN
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#else
+ ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+ ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#endif
} else {
data0 = (1UL << SH2_PTC_A_SHFT) |
(nbits << SH2_PTC_PS_SHFT) |
(1UL << SH2_PTC_START_SHFT);
+#ifndef XEN
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+#else
+ ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+#endif
(rr_value << SH2_PTC_RID_SHFT));
ptc1 = NULL;
}
if (unlikely(nasid == -1))
ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
- sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+ sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
}
#ifdef CONFIG_PROC_FS
#include <xen/event.h>
#define apicid_to_phys_cpu_present(x) 1
+#ifdef CONFIG_IA64_GENERIC
+unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
+{
+ return (unsigned int) vec;
+}
+#endif
+
/*
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
// manufactured from component pieces
// defined in linux/arch/ia64/defconfig
-//#define CONFIG_IA64_GENERIC
-#define CONFIG_IA64_HP_SIM
+#define CONFIG_IA64_GENERIC
+#define CONFIG_HZ 32
+
#define CONFIG_IA64_L1_CACHE_SHIFT 7
// needed by include/asm-ia64/page.h
#define CONFIG_IA64_PAGE_SIZE_16KB // 4KB doesn't work?!?
// avoid redefining task_struct in asm/current.h
#define task_struct vcpu
-// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
-#define platform_inb __ia64_inb
-#define platform_inw __ia64_inw
-#define platform_inl __ia64_inl
-#define platform_outb __ia64_outb
-#define platform_outw __ia64_outw
-#define platform_outl __ia64_outl
-
#include <xen/cache.h>
#ifndef CONFIG_SMP
#define __cacheline_aligned_in_smp
// Deprivated linux inf and put here for short time compatibility
#define kmalloc(s, t) xmalloc_bytes((s))
#define kfree(s) xfree((s))
+#define kzalloc(size, flags) \
+({ \
+ unsigned char *mem; \
+ mem = (unsigned char *)xmalloc_bytes(size); \
+ if (mem) \
+ memset(mem, 0, size); \
+ (void *)mem; \
+})
+#define kcalloc(n, size, flags) kzalloc(n * size, flags)
+#define alloc_bootmem_node(pg, size) xmalloc_bytes(size)
// see common/keyhandler.c
#define nop() asm volatile ("nop 0")
* the macros are used directly.
*/
#define platform_name "dig"
+#ifdef XEN
+/*
+ * All the World is a PC .... yay! yay! yay!
+ */
+extern ia64_mv_setup_t hpsim_setup;
+#define platform_setup hpsim_setup
+
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent machvec_noop
+#define platform_dma_free_coherent machvec_noop
+#define platform_dma_map_single machvec_noop
+#define platform_dma_unmap_single machvec_noop
+#define platform_dma_map_sg machvec_noop
+#define platform_dma_unmap_sg machvec_noop
+#define platform_dma_sync_single_for_cpu machvec_noop
+#define platform_dma_sync_sg_for_cpu machvec_noop
+#define platform_dma_sync_single_for_device machvec_noop
+#define platform_dma_sync_sg_for_device machvec_noop
+#define platform_dma_mapping_error machvec_noop
+#define platform_dma_supported machvec_noop
+
+#define platform_pci_get_legacy_mem machvec_noop
+#define platform_pci_legacy_read machvec_noop
+#define platform_pci_legacy_write machvec_noop
+#else
#define platform_setup dig_setup
+#endif
#endif /* _ASM_IA64_MACHVEC_DIG_h */
* the macros are used directly.
*/
#define platform_name "hpzx1"
+#ifdef XEN
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+#define platform_setup hpsim_setup
+#define platform_irq_init hpsim_irq_init
+
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent machvec_noop
+#define platform_dma_free_coherent machvec_noop
+#define platform_dma_map_single machvec_noop
+#define platform_dma_unmap_single machvec_noop
+#define platform_dma_map_sg machvec_noop
+#define platform_dma_unmap_sg machvec_noop
+#define platform_dma_sync_single_for_cpu machvec_noop
+#define platform_dma_sync_sg_for_cpu machvec_noop
+#define platform_dma_sync_single_for_device machvec_noop
+#define platform_dma_sync_sg_for_device machvec_noop
+#define platform_dma_mapping_error machvec_noop
+#define platform_dma_supported machvec_noop
+
+#define platform_pci_get_legacy_mem machvec_noop
+#define platform_pci_legacy_read machvec_noop
+#define platform_pci_legacy_write machvec_noop
+#else
#define platform_setup dig_setup
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
#define platform_dma_supported sba_dma_supported
#define platform_dma_mapping_error sba_dma_mapping_error
+#endif
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported;
+#ifndef XEN
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
+#endif
/*
#define platform_cpu_init sn_cpu_init
#define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI
+#ifndef XEN
#define platform_timer_interrupt sn_timer_interrupt
+#endif
#define platform_global_tlb_purge sn2_global_tlb_purge
+#ifndef XEN
#define platform_tlb_migrate_finish sn_tlb_migrate_finish
+#endif
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
#define platform_readl_relaxed __sn_readl_relaxed
#define platform_readq_relaxed __sn_readq_relaxed
#define platform_local_vector_to_irq sn_local_vector_to_irq
+#ifdef XEN
+#define platform_pci_get_legacy_mem machvec_noop
+#define platform_pci_legacy_read machvec_noop
+#define platform_pci_legacy_write machvec_noop
+#else
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
+#endif
#define platform_dma_init machvec_noop
+#ifdef XEN
+#define platform_dma_alloc_coherent machvec_noop
+#define platform_dma_free_coherent machvec_noop
+#define platform_dma_map_single machvec_noop
+#define platform_dma_unmap_single machvec_noop
+#define platform_dma_map_sg machvec_noop
+#define platform_dma_unmap_sg machvec_noop
+#define platform_dma_sync_single_for_cpu machvec_noop
+#define platform_dma_sync_sg_for_cpu machvec_noop
+#define platform_dma_sync_single_for_device machvec_noop
+#define platform_dma_sync_sg_for_device machvec_noop
+#define platform_dma_mapping_error machvec_noop
+#define platform_dma_supported machvec_noop
+#else
#define platform_dma_alloc_coherent sn_dma_alloc_coherent
#define platform_dma_free_coherent sn_dma_free_coherent
#define platform_dma_map_single sn_dma_map_single
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
#define platform_migrate sn_migrate
+#endif
+
+#ifndef XEN
#ifdef CONFIG_PCI_MSI
#define platform_setup_msi_irq sn_setup_msi_irq
#define platform_teardown_msi_irq sn_teardown_msi_irq
#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
+#endif
#include <asm/sn/io.h>
#include <asm/intrinsics.h>
#include <asm/types.h>
+#ifdef XEN /* This will go away with newer upstream */
+#define RGN_SHIFT 61
+#define RGN_BASE(r) (r << RGN_SHIFT)
+#define RGN_BITS RGN_BASE(-1)
+#define RGN_HPAGE REGION_HPAGE
+#ifndef CONFIG_HUGETLB_PAGE
+# define REGION_HPAGE (4UL)
+#endif
+#endif
+
/*
* PAGE_SHIFT determines the actual kernel page size.
*/
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#ifdef XEN
+#include <linux/ioport.h>
+#endif
#include <asm/io.h>
+#ifndef XEN
#include <asm/scatterlist.h>
+#endif
/*
* Can be used to override the logic in pci_scan_bus for skipping already-configured bus
#define HAVE_ARCH_PCI_MWI 1
extern int pcibios_prep_mwi (struct pci_dev *);
+#ifndef XEN
#include <asm-generic/pci-dma-compat.h>
+#endif
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma);
+#ifndef XEN
extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
size_t count);
extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
extern int pci_mmap_legacy_mem(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma);
+#endif
#define pci_get_legacy_mem platform_pci_get_legacy_mem
#define pci_legacy_read platform_pci_legacy_read
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
+#ifndef XEN
static inline struct resource *
pcibios_select_root(struct pci_dev *pdev, struct resource *res)
{
return root;
}
+#endif
#define pcibios_scan_all_fns(a, b) 0
*/
#define TO_PHYS(x) (TO_PHYS_MASK & (x))
#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
-#ifdef CONFIG_SGI_SN
+#if defined(CONFIG_SGI_SN) || defined(XEN)
#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
#define TO_GET(x) (GET_BASE | TO_PHYS(x))
#else
#ifndef _ASM_IA64_SN_ARCH_H
#define _ASM_IA64_SN_ARCH_H
+#ifndef XEN
#include <linux/numa.h>
#include <asm/types.h>
#include <asm/percpu.h>
#include <asm/sn/types.h>
+#endif
#include <asm/sn/sn_cpuid.h>
/*
};
DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
+#ifndef XEN
#define is_shub2() (sn_hub_info->shub2)
#define is_shub1() (sn_hub_info->shub2 == 0)
+#else
+#define is_shub2() 0
+#define is_shub1() 1
+#endif
/*
* Use this macro to test if shub 1.1 wars should be enabled
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
-
+#ifndef XEN
extern u8 sn_partition_id;
extern u8 sn_system_size;
extern u8 sn_sharing_domain_size;
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes);
-
+#endif
#endif /* _ASM_IA64_SN_ARCH_H */
#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
#define _ASM_IA64_SN_XTALK_HUBDEV_H
+#ifndef XEN
#include "xtalk/xwidgetdev.h"
+#else
+#include <asm/sn/xwidgetdev.h>
+#endif
#define HUB_WIDGET_ID_MAX 0xf
#define DEV_PER_WIDGET (2*2*8)
#define __sn_mf_a() ia64_mfa()
+#ifdef XEN
+/*
+ * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
+ */
+#define sn_dma_flush(foo) do {} while(0)
+#else
extern void sn_dma_flush(unsigned long);
+#endif
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#ifdef XEN
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#endif
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
extern long pio_phys_read_mmr(volatile long *mmr);
extern void pio_phys_write_mmr(volatile long *mmr, long val);
+#ifndef XEN
extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
+#else
+extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long val1, volatile unsigned long *mmr2, long val2);
+#endif
#endif /* _ASM_IA64_SN_RW_MMR_H */
#ifdef XEN
#define local_irq_is_enabled() (!irqs_disabled())
extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
#else
#ifdef __KERNEL__
/* driverfs interface for exporting bus attributes */
struct bus_attribute {
+#ifndef XEN
struct attribute attr;
+#endif
ssize_t (*show)(struct bus_type *, char * buf);
ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
};
/* driverfs interface for exporting driver attributes */
struct driver_attribute {
+#ifndef XEN
struct attribute attr;
+#endif
ssize_t (*show)(struct device_driver *, char * buf);
ssize_t (*store)(struct device_driver *, const char * buf, size_t count);
};
struct list_head children;
struct list_head devices;
struct list_head interfaces;
+#ifdef XEN
+ spinlock_t sem;
+#else
struct semaphore sem; /* locks both the children and interfaces lists */
+#endif
struct kobject *virtual_dir;
struct class_attribute {
+#ifndef XEN
struct attribute attr;
+#endif
ssize_t (*show)(struct class *, char * buf);
ssize_t (*store)(struct class *, const char * buf, size_t count);
};
extern void class_remove_file(struct class *, const struct class_attribute *);
struct class_device_attribute {
+#ifndef XEN
struct attribute attr;
+#endif
ssize_t (*show)(struct class_device *, char * buf);
ssize_t (*store)(struct class_device *, const char * buf, size_t count);
};
struct device_attribute uevent_attr;
struct device_attribute *devt_attr;
+#ifdef XEN
+ spinlock_t sem;
+#else
struct semaphore sem; /* semaphore to synchronize calls to
* its driver.
*/
+#endif
struct bus_type * bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
struct subsystem {
struct kset kset;
+#ifndef XEN
struct rw_semaphore rwsem;
+#endif
};
#define decl_subsys(_name,_type,_uevent_ops) \
}
struct subsys_attribute {
+#ifndef XEN
struct attribute attr;
+#endif
ssize_t (*show)(struct subsystem *, char *);
ssize_t (*store)(struct subsystem *, const char *, size_t);
};
/* Include the ID list */
#include <linux/pci_ids.h>
+#ifdef XEN
+#include <asm/processor.h>
+#endif
/*
* The PCI interface treats multi-function devices as independent
+++ /dev/null
-#ifndef _ASM_IA64_PCI_H
-#define _ASM_IA64_PCI_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-/*
- * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
- * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
- * loader.
- */
-#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
-
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM 0x10000000
-
-void pcibios_config_init(void);
-
-struct pci_dev;
-
-/*
- * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
- * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
- * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
- * network device layers. Platforms with separate bus address spaces _must_ turn this off
- * and provide a device DMA mapping implementation that takes care of the necessary
- * address translation.
- *
- * For now, the ia64 platforms which may have separate/multiple bus address spaces all
- * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
- * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
-
-static inline void
-pcibios_set_master (struct pci_dev *dev)
-{
- /* No special bus mastering setup handling */
-}
-
-static inline void
-pcibios_penalize_isa_irq (int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define HAVE_ARCH_PCI_MWI 1
-extern int pcibios_prep_mwi (struct pci_dev *);
-
-#include <asm-generic/pci-dma-compat.h>
-
-/* pci_unmap_{single,page} is not a nop, thus... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-/* The ia64 platform always supports 64-bit addressing. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off))
-#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr)))
-#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr)
-#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0)
-#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0)
-
-#define sg_dma_len(sg) ((sg)->dma_length)
-#define sg_dma_address(sg) ((sg)->dma_address)
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- unsigned long cacheline_size;
- u8 byte;
-
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
- if (byte == 0)
- cacheline_size = 1024;
- else
- cacheline_size = (int) byte * 4;
-
- *strat = PCI_DMA_BURST_MULTIPLE;
- *strategy_parameter = cacheline_size;
-}
-#endif
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-#define HAVE_PCI_LEGACY
-extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
- struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
- size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
- size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
- struct bin_attribute *attr,
- struct vm_area_struct *vma);
-
-#define pci_get_legacy_mem platform_pci_get_legacy_mem
-#define pci_legacy_read platform_pci_legacy_read
-#define pci_legacy_write platform_pci_legacy_write
-
-struct pci_window {
- struct resource resource;
- u64 offset;
-};
-
-struct pci_controller {
- void *acpi_handle;
- void *iommu;
- int segment;
- int node; /* nearest node with memory or -1 for global allocation */
-
- unsigned int windows;
- struct pci_window *window;
-
- void *platform_data;
-};
-
-#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
-#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
-
-extern struct pci_ops pci_root_ops;
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
- return (pci_domain_nr(bus) != 0);
-}
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region, struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
- struct resource *res, struct pci_bus_region *region);
-
-#define pcibios_scan_all_fns(a, b) 0
-
-#endif /* _ASM_IA64_PCI_H */